bitkeeper revision 1.1159.24.1 (411a8dc04Gzs_coIAm_0Gf5NK-YpxQ)
authoriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>
Wed, 11 Aug 2004 21:21:04 +0000 (21:21 +0000)
committeriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>
Wed, 11 Aug 2004 21:21:04 +0000 (21:21 +0000)
suspend/resume now works again, though netfront contains a workaround hack for a xend issue.

linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c
linux-2.4.26-xen-sparse/arch/xen/kernel/setup.c
linux-2.6.7-xen-sparse/drivers/xen/netback/interface.c
linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c

index 0d5fd969687d7c7ae5e78d3d34ce910ae0302df5..c50d94c57d9919697fa9b56b7b433122c93077d1 100644 (file)
@@ -60,6 +60,34 @@ static int           sg_operation = -1;
 static unsigned long sg_next_sect;
 #define DISABLE_SCATTERGATHER() (sg_operation = -1)
 
+
+inline void translate_req_to_pfn( blkif_request_t * xreq, blkif_request_t * req)
+{
+    int i;
+    
+    *xreq=*req; 
+    for ( i=0; i<req->nr_segments; i++ )
+    {  
+       xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) |
+           (machine_to_phys_mapping[req->frame_and_sects[i]>>PAGE_SHIFT]<<PAGE_SHIFT); 
+    }
+    return xreq;
+}
+
+inline void translate_req_to_mfn( blkif_request_t * xreq, blkif_request_t * req)
+{
+    int i;
+
+    *xreq=*req; 
+    for ( i=0; i<req->nr_segments; i++ )
+    {  
+       xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) |
+           (phys_to_machine_mapping[req->frame_and_sects[i]>>PAGE_SHIFT]<<PAGE_SHIFT);
+    }
+    return xreq;
+}
+
+
 static inline void flush_requests(void)
 {
     DISABLE_SCATTERGATHER();
@@ -364,8 +392,8 @@ static int blkif_queue_request(unsigned long   id,
                 DISABLE_SCATTERGATHER();
 
             /* Update the copy of the request in the recovery ring. */
-            blk_ring_rec->ring[MASK_BLKIF_IDX(blk_ring_rec->req_prod - 1)].req
-                = *req;
+           translate_req_to_pfn(&blk_ring_rec->ring[
+               MASK_BLKIF_IDX(blk_ring_rec->req_prod - 1)].req, req);
 
             return 0;
         }
@@ -395,8 +423,9 @@ static int blkif_queue_request(unsigned long   id,
     req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
     req_prod++;
 
-    /* Keep a private copy so we can reissue requests when recovering. */
-    blk_ring_rec->ring[MASK_BLKIF_IDX(blk_ring_rec->req_prod)].req = *req;
+    /* Keep a private copy so we can reissue requests when recovering. */    
+    translate_req_to_pfn(&blk_ring_rec->ring[
+       MASK_BLKIF_IDX(blk_ring_rec->req_prod)].req, req);
     blk_ring_rec->req_prod++;
 
     return 0;
@@ -570,9 +599,11 @@ void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
     }
 
     DISABLE_SCATTERGATHER();
-    memcpy(&blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req, req, sizeof(*req));
-    memcpy(&blk_ring_rec->ring[MASK_BLKIF_IDX(blk_ring_rec->req_prod++)].req,
-           req, sizeof(*req));
+    blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req;
+    
+    translate_req_to_pfn(&blk_ring_rec->ring[
+       MASK_BLKIF_IDX(blk_ring_rec->req_prod++)].req,req);
+
     req_prod++;
     flush_requests();
 
@@ -660,7 +691,7 @@ static void blkif_status_change(blkif_fe_interface_status_changed_t *status)
 
         if ( recovery )
         {
-            int i;
+            int i,j;
 
            /* Shouldn't need the io_request_lock here - the device is
             * plugged and the recovery flag prevents the interrupt handler
@@ -670,18 +701,24 @@ static void blkif_status_change(blkif_fe_interface_status_changed_t *status)
             for ( i = 0;
                  resp_cons_rec < blk_ring_rec->req_prod;
                   resp_cons_rec++, i++ )
-            {
-                blk_ring->ring[i].req
-                    = blk_ring_rec->ring[MASK_BLKIF_IDX(resp_cons_rec)].req;
+            {                
+                translate_req_to_mfn(&blk_ring->ring[i].req,
+                                    &blk_ring_rec->ring[
+                                        MASK_BLKIF_IDX(resp_cons_rec)].req);
             }
 
-            /* Reset the private block ring to match the new ring. */
-            memcpy(blk_ring_rec, blk_ring, sizeof(*blk_ring));
+            /* Reset the private block ring to match the new ring. */      
+           for( j=0; j<i; j++ )
+           {           
+               translate_req_to_pfn(
+                   &blk_ring_rec->ring[j].req,
+                   &blk_ring->ring[j].req);
+           }
+
             resp_cons_rec = 0;
 
             /* blk_ring->req_prod will be set when we flush_requests().*/
             blk_ring_rec->req_prod = req_prod = i;
-
             wmb();
 
             /* Switch off recovery mode, using a memory barrier to ensure that
@@ -806,8 +843,7 @@ void blkdev_suspend(void)
 void blkdev_resume(void)
 {
     ctrl_msg_t                       cmsg;
-    blkif_fe_driver_status_changed_t st;
-    
+    blkif_fe_driver_status_changed_t st;    
 
     /* Send a driver-UP notification to the domain controller. */
     cmsg.type      = CMSG_BLKIF_FE;
index 6acd49bc0606c1e7a78521352fac9a4212c6a08b..9b740aaaa8cf5632f3b3b0f3430186b0fbf6cc1d 100644 (file)
@@ -1197,11 +1197,12 @@ static int shutting_down = -1;
 
 static void __do_suspend(void)
 {
+    int i,j;
     /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
     extern void blkdev_suspend(void);
     extern void blkdev_resume(void);
-    extern void netif_resume(void);
-    
+    extern void netif_suspend(void);
+    extern void netif_resume(void);    
     extern void time_suspend(void);
     extern void time_resume(void);
 
@@ -1213,11 +1214,12 @@ static void __do_suspend(void)
 
     suspend_record->nr_pfns = max_pfn; /* final number of pfns */
 
-    //netdev_suspend();
-    //blkdev_suspend();
-
     __cli();
 
+    netif_suspend();
+
+    blkdev_suspend();
+
     time_suspend();
 
     ctrl_if_suspend();
@@ -1241,17 +1243,28 @@ static void __do_suspend(void)
 
     memset(empty_zero_page, 0, PAGE_SIZE);
 
+    for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+    {  
+        pfn_to_mfn_frame_list[j] = 
+            virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+    }
+    HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+       virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+
     irq_resume();
 
     ctrl_if_resume();
 
     time_resume();
 
-    __sti();
-
     blkdev_resume();
+
     netif_resume();
 
+    __sti();
+
+
  out:
     if ( suspend_record != NULL )
         free_page((unsigned long)suspend_record);
index 01447b1de835b99b96cca1642a1497d0e4b30d63..76d23171b24140f2476d37b764433bd9be84e276 100644 (file)
@@ -241,6 +241,8 @@ void netif_connect(netif_be_connect_t *connect)
     netif->status         = CONNECTED;
     netif_get(netif);
 
+    netif->tx->resp_prod = netif->rx->resp_prod = 0;
+
     rtnl_lock();
     (void)dev_open(netif->dev);
     rtnl_unlock();
index 368feadc5513ab9edc596a67b83642593be6aeaa..a9b59505bc6cc15a42e60731d666711c858ecb7e 100644 (file)
@@ -546,7 +546,7 @@ static void network_connect(struct net_device *dev,
 
     /* Step 1: Reinitialise variables. */
     np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
-    np->rx->event = 1;
+    np->rx->event = np->tx->event = 1;
 
     /* Step 2: Rebuild the RX and TX ring contents.
      * NB. We could just free the queued TX packets now but we hope
@@ -776,12 +776,13 @@ static int create_netdev(int handle, struct net_device **val)
  * Initialize the network control interface. Set the number of network devices
  * and create them.
  */
+
 static void netif_driver_status_change(
     netif_fe_driver_status_changed_t *status)
 {
     int err = 0;
     int i;
-    
+
     netctrl.interface_n = status->nr_interfaces;
     netctrl.connected_n = 0;
 
@@ -874,11 +875,75 @@ static int __init netif_init(void)
     return err;
 }
 
+void netif_suspend(void)
+{
+#if 1 /* XXX THIS IS TEMPORARY */
+    struct net_device *dev = NULL;
+    struct net_private *np = NULL;
+    int i;
+
+/* avoid having tx/rx stuff happen until we're ready */
+
+    for(i=0;i<netctrl.interface_n;i++)
+    {
+       char name[32];
+
+       sprintf(name,"eth%d",i);
+       dev = __dev_get_by_name(name);
+
+       if ( dev && (dev->flags & IFF_UP) )
+       {
+           np  = dev->priv;
+
+           free_irq(np->irq, dev);
+            unbind_evtchn_from_irq(np->evtchn);
+       }    
+    }
+#endif
+}
+
 void netif_resume(void)
 {
     ctrl_msg_t                       cmsg;
-    netif_fe_driver_status_changed_t st;
+    netif_fe_interface_connect_t     up;
+//    netif_fe_driver_status_changed_t   st;
+    struct net_device *dev = NULL;
+    struct net_private *np = NULL;
+    int i;
+
+#if 1
+    /* XXX THIS IS TEMPORARY */
+
+    for(i=0;i<netctrl.interface_n;i++)    
+    {
+       char name[32];
 
+       sprintf(name,"eth%d",i);
+       dev = __dev_get_by_name(name);
+
+       if ( dev ) // connect regardless of whether IFF_UP flag set
+       {
+           np  = dev->priv;
+
+           // stop bad things from happening until we're back up
+           np->backend_state = BEST_DISCONNECTED;
+
+           cmsg.type      = CMSG_NETIF_FE;
+           cmsg.subtype   = CMSG_NETIF_FE_INTERFACE_CONNECT;
+           cmsg.length    = sizeof(netif_fe_interface_connect_t);
+           up.handle      = np->handle;
+           up.tx_shmem_frame = virt_to_machine(np->tx) >> PAGE_SHIFT;
+           up.rx_shmem_frame = virt_to_machine(np->rx) >> PAGE_SHIFT;
+           memcpy(cmsg.msg, &up, sizeof(up));
+
+           /* Tell the controller to bring up the interface. */
+           ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+       }
+    }
+#endif     
+
+
+#if 0
     /* Send a driver-UP notification to the domain controller. */
     cmsg.type      = CMSG_NETIF_FE;
     cmsg.subtype   = CMSG_NETIF_FE_DRIVER_STATUS_CHANGED;
@@ -887,6 +952,8 @@ void netif_resume(void)
     st.nr_interfaces = 0;
     memcpy(cmsg.msg, &st, sizeof(st));
     ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+#endif
+
 
 }